diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 878ca54fb36f44a5763ffe63e41d63fc9d814451..160d32084c2a2d4b47b1b176c066f3d822265357 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -692,73 +692,6 @@ class PVString { !eq(nf, 8): !if(signed, "PvPvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUvPUv")); } -multiclass RVVStridedSegLoad { - foreach type = TypeList in { - defvar eew = !cond(!eq(type, "c") : "8", - !eq(type, "s") : "16", - !eq(type, "i") : "32", - !eq(type, "l") : "64", - !eq(type, "x") : "16", - !eq(type, "f") : "32", - !eq(type, "d") : "64"); - foreach nf = NFList in { - let Name = op # nf # "e" # eew # "_v", - IRName = op # nf, - MaskedIRName = op # nf # "_mask", - NF = nf, - ManualCodegen = [{ - { - ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType()); - IntrinsicTypes = {ResultType, Ops.back()->getType()}; - SmallVector Operands; - - // Please refer to comment under 'defvar NFList' in this file - if ((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || - (!IsMasked && PolicyAttrs & RVV_VTA)) - Operands.append(NF, llvm::PoisonValue::get(ResultType)); - else { - if (IsMasked) - Operands.append(Ops.begin() + NF + 1, Ops.begin() + 2 * NF + 1); - else // Unmasked - Operands.append(Ops.begin() + NF, Ops.begin() + 2 * NF); - } - unsigned PtrOperandIdx = IsMasked ? - ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ? NF + 1 : 2 * NF + 1 : - (PolicyAttrs & RVV_VTA) ? NF : 2 * NF; - Value *PtrOperand = Ops[PtrOperandIdx]; - Value *StrideOperand = Ops[PtrOperandIdx + 1]; - Value *VLOperand = Ops[PtrOperandIdx + 2]; - Operands.push_back(PtrOperand); - Operands.push_back(StrideOperand); - if (IsMasked) - Operands.push_back(Ops[NF]); - Operands.push_back(VLOperand); - if (IsMasked) - Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); - - llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); - llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); - clang::CharUnits Align = - CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType()); - llvm::Value *V; - for (unsigned I = 0; I < NF; ++I) { - llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {I}); - V = Builder.CreateStore(Val, Address(Ops[I], Val->getType(), Align)); - } - return V; - } - }] in { - defvar PV = PVString.S; - defvar PUV = PVString.S; - def : RVVBuiltin<"v", "0" # PV # "PCe" # "t", type>; - if !not(IsFloat.val) then { - def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # "t", type>; - } - } - } - } -} - multiclass RVVIndexedSegLoad { foreach type = TypeList in { foreach eew_info = EEWList in { @@ -1320,7 +1253,6 @@ defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>; // 7.8 Vector Load/Store Segment Instructions let UnMaskedPolicyScheme = HasPassthruOperand in { -defm : RVVStridedSegLoad<"vlsseg">; defm : RVVIndexedSegLoad<"vluxseg">; defm : RVVIndexedSegLoad<"vloxseg">; } @@ -1518,8 +1450,7 @@ multiclass RVVStridedSegLoadTuple { !eq(type, "f") : "32", !eq(type, "d") : "64"); foreach nf = NFList in { - let Name = op # nf # "e" # eew # "_v_tuple", - OverloadedName = op # nf # "e" # eew # "_tuple", + let Name = op # nf # "e" # eew # "_v", IRName = op # nf, MaskedIRName = op # nf # "_mask", NF = nf, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c index 3e5db78c4137273db7730b84510822e97a461b8c..fd3cad742f333192d2f9a430035b04abc07ba02f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c @@ -7,423 +7,303 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4(v0, v1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2(v0, v1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1(v0, v1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2(v0, v1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4(v0, v1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4(v0, v1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2(v0, v1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1(v0, v1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2(v0, v1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4(v0, v1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4(v0, v1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2(v0, v1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1(v0, v1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2(v0, v1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4(v0, v1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4_m(v0, v1, mask, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2_m(v0, v1, mask, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1_m(v0, v1, mask, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2_m(v0, v1, mask, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4_m(v0, v1, mask, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4_m(v0, v1, mask, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2_m(v0, v1, mask, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1_m(v0, v1, mask, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2_m(v0, v1, mask, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4_m(v0, v1, mask, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4_m(v0, v1, mask, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2_m(v0, v1, mask, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1_m(v0, v1, mask, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2_m(v0, v1, mask, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4_m(v0, v1, mask, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c index 4e6647ea6dc7628b07639a0e5d929dd61156b393..e941c98d2a503676368935b7e28cddf81380a2e2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c @@ -7,339 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2(v0, v1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1(v0, v1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2(v0, v1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4(v0, v1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2(v0, v1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1(v0, v1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2(v0, v1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4(v0, v1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2(v0, v1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1(v0, v1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2(v0, v1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4(v0, v1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2_m(v0, v1, mask, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1_m(v0, v1, mask, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2_m(v0, v1, mask, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4_m(v0, v1, mask, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2_m(v0, v1, mask, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1_m(v0, v1, mask, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2_m(v0, v1, mask, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4_m(v0, v1, mask, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2_m(v0, v1, mask, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1_m(v0, v1, mask, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2_m(v0, v1, mask, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4_m(v0, v1, mask, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32_tuple.c deleted file mode 100644 index abc3b2e7ec2affa057914148d23c32937930d256..0000000000000000000000000000000000000000 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32_tuple.c +++ /dev/null @@ -1,248 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2(const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2(const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2(const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2(const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2_m(mask, base, bstride, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c index 0efaf5e21bb0ec23bdd81c49790ae0b65ae28fa0..d0a9bfbae197da8171dfc424c7112c143bb0ed25 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c @@ -7,255 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1(v0, v1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2(v0, v1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4(v0, v1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1(v0, v1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2(v0, v1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4(v0, v1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1(v0, v1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2(v0, v1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4(v0, v1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1_m(v0, v1, mask, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2_m(v0, v1, mask, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4_m(v0, v1, mask, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_m(vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1_m(v0, v1, mask, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2_m(v0, v1, mask, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4_m(v0, v1, mask, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1_m(v0, v1, mask, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2_m(v0, v1, mask, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4_m(v0, v1, mask, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c index 6c12dc251fd6afd8e7227852ebd00244f8f56299..5e7a2dc432b9c1988fd353cadec8b202e70a0717 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c @@ -6,339 +6,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8(v0, v1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4(v0, v1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2(v0, v1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1(v0, v1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2(v0, v1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4(v0, v1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8(v0, v1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4(v0, v1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2(v0, v1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1(v0, v1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2(v0, v1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4(v0, v1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8_m(v0, v1, mask, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4_m(v0, v1, mask, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2_m(v0, v1, mask, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1_m(v0, v1, mask, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2_m(v0, v1, mask, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4_m(v0, v1, mask, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8_m(v0, v1, mask, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4_m(v0, v1, mask, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2_m(v0, v1, mask, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1_m(v0, v1, mask, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2_m(v0, v1, mask, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4_m(v0, v1, mask, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c index 1db9cf1205e1cdcb6ab0967c88750d1f3e9f1342..74960833e5e336a897471d30dacbb74fc4e2955f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c @@ -7,387 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4(v0, v1, v2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2(v0, v1, v2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1(v0, v1, v2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2(v0, v1, v2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4(v0, v1, v2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2(v0, v1, v2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1(v0, v1, v2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2(v0, v1, v2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4(v0, v1, v2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2(v0, v1, v2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1(v0, v1, v2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2(v0, v1, v2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4_m(v0, v1, v2, mask, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2_m(v0, v1, v2, mask, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1_m(v0, v1, v2, mask, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2_m(v0, v1, v2, mask, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4_m(v0, v1, v2, mask, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2_m(v0, v1, v2, mask, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1_m(v0, v1, v2, mask, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2_m(v0, v1, v2, mask, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4_m(v0, v1, v2, mask, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2_m(v0, v1, v2, mask, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1_m(v0, v1, v2, mask, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2_m(v0, v1, v2, mask, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c index ece187e9dfeb9840a670255326e533427393fa67..c21482f5445c160dfce6ad33b76b336a89406aae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c @@ -7,291 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2(v0, v1, v2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1(v0, v1, v2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2(v0, v1, v2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2(v0, v1, v2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1(v0, v1, v2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2(v0, v1, v2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2(v0, v1, v2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1(v0, v1, v2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2(v0, v1, v2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2_m(v0, v1, v2, mask, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1_m(v0, v1, v2, mask, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2_m(v0, v1, v2, mask, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2_m(v0, v1, v2, mask, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1_m(v0, v1, v2, mask, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2_m(v0, v1, v2, mask, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2_m(v0, v1, v2, mask, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1_m(v0, v1, v2, mask, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2_m(v0, v1, v2, mask, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c index 7759d088a4f5ef76bdcc5ea9682846fdddd3e969..04a48f93ce7c3bae21f3967c9beb4216432151cf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c @@ -7,195 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1(v0, v1, v2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2(v0, v1, v2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1(v0, v1, v2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2(v0, v1, v2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1(v0, v1, v2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2(v0, v1, v2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1_m(v0, v1, v2, mask, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2_m(v0, v1, v2, mask, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1_m(v0, v1, v2, mask, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2_m(v0, v1, v2, mask, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1_m(v0, v1, v2, mask, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2_m(v0, v1, v2, mask, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c index 2d6a2be8dff539983cd415febfa4a96f31627a4b..3449c4b577c398fd7025794a1c287fb503fcbb01 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c @@ -6,323 +6,203 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8(v0, v1, v2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4(v0, v1, v2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2(v0, v1, v2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1(v0, v1, v2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2(v0, v1, v2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8(v0, v1, v2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4(v0, v1, v2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2(v0, v1, v2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1(v0, v1, v2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2(v0, v1, v2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8_m(v0, v1, v2, mask, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4_m(v0, v1, v2, mask, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2_m(v0, v1, v2, mask, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1_m(v0, v1, v2, mask, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2_m(v0, v1, v2, mask, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8_m(v0, v1, v2, mask, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4_m(v0, v1, v2, mask, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2_m(v0, v1, v2, mask, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1_m(v0, v1, v2, mask, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2_m(v0, v1, v2, mask, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c index c53b891e74665727a92056768ec0f62ca3807103..1828de3584041d9586901082e66be469d85a3319 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c @@ -7,435 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4(v0, v1, v2, v3, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2(v0, v1, v2, v3, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1(v0, v1, v2, v3, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2(v0, v1, v2, v3, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4(v0, v1, v2, v3, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2(v0, v1, v2, v3, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1(v0, v1, v2, v3, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2(v0, v1, v2, v3, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4(v0, v1, v2, v3, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2(v0, v1, v2, v3, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1(v0, v1, v2, v3, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2(v0, v1, v2, v3, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c index e863b1094ab2b9ea8b15e7dc38039be0813f1d12..87fac712d7cccef48e496df78888e6e45d174133 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c @@ -7,327 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2(v0, v1, v2, v3, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1(v0, v1, v2, v3, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2(v0, v1, v2, v3, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2(v0, v1, v2, v3, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1(v0, v1, v2, v3, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2(v0, v1, v2, v3, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2(v0, v1, v2, v3, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1(v0, v1, v2, v3, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2(v0, v1, v2, v3, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c index 6f0b0992f8e543557f842663e2f1a5beffcc6417..d6b7f483ee44ef692ae9a3a4134c187314b558a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c @@ -7,219 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1(v0, v1, v2, v3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2(v0, v1, v2, v3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1(v0, v1, v2, v3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2(v0, v1, v2, v3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1(v0, v1, v2, v3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2(v0, v1, v2, v3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c index 5d1898a22a3940276941693fb32b3fb1d75d78a0..7415b0f60c630e59ac90ea3cfbedf98727185e9a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c @@ -6,363 +6,203 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8(v0, v1, v2, v3, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4(v0, v1, v2, v3, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2(v0, v1, v2, v3, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1(v0, v1, v2, v3, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2(v0, v1, v2, v3, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8(v0, v1, v2, v3, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4(v0, v1, v2, v3, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2(v0, v1, v2, v3, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1(v0, v1, v2, v3, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2(v0, v1, v2, v3, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c index 0ecae0b82edb97446606f701ed15adca4886fb23..bafd3787848f7cca0130f05d3eefa26eaef0ed4f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c @@ -7,363 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4(v0, v1, v2, v3, v4, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1(v0, v1, v2, v3, v4, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4(v0, v1, v2, v3, v4, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1(v0, v1, v2, v3, v4, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c index 079373ece334f3a68b86865083e3c7b0ece9fbe5..41b5a87c38d01222bcc4316f762fee0ec43575fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c @@ -7,243 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1(v0, v1, v2, v3, v4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1(v0, v1, v2, v3, v4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c index 0319cc582dd0a6161033b2f6727612e6be169fda..1e0a16bd5c607bd07ecac87c6d7b6955a4677f50 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c @@ -7,123 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1(v0, v1, v2, v3, v4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1(v0, v1, v2, v3, v4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c index 7284e6d3ae09f55c5a03bad38dd68cde5d600b87..4f1546edb3d364bf746dda26daee9e539d74bcf5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c @@ -6,323 +6,163 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8(v0, v1, v2, v3, v4, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4(v0, v1, v2, v3, v4, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1(v0, v1, v2, v3, v4, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8(v0, v1, v2, v3, v4, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4(v0, v1, v2, v3, v4, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1(v0, v1, v2, v3, v4, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c index 3a0944ba9cd0a0e4f503c5a45168f6cf9c91172d..92c4b670c8c67ba2fc1c447b2e445eb58bdf6d27 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c @@ -7,399 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c index c87be4c342329cf71d35851613cc4ffb19236d3f..836341ef0d357c68808d50d29e4658500555dc88 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c @@ -7,267 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c index fedb3f3bed957396836ea4eb4c618a5b3f75b4e7..ce97076f85beffb79048a1ef631304a171105416 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c @@ -7,135 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c index 6663d0852909ebc4c69ebd511bd116d067f7d6b4..cec568fd0b1d9e109728266a70f70c00f5b2535b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c @@ -6,355 +6,163 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c index 41992e6e69189831092ac0e3cd8c1ef0e66e65b9..e227bf671bbeff65eb30bd03ef157e62c7f5546b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c @@ -7,435 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c index ac3eb27c9152f6794e4cd233a05acfa56922a1a2..74ffc5240ab9a6d05db90eba66e0470ed0f8cd87 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c @@ -7,291 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c index 3ca3fa4512888208521ac49838a390fe59154401..a1b8f184c049eb06127f14e1db0da497758504a5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c @@ -7,147 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c index 45cd9cec2fd355810fc42eeb4d492fa4d4a3c9a2..1bdbe07672e7bc08b94e96e62bee6994fe87d1e8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c @@ -6,387 +6,163 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c index 53718c7d5935a929f27eb932180d7f14167f1a21..9f21e74fff690a5d83ebafbf9adfff302f32898c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c @@ -7,471 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c index bbd02c8eb54e80bee4b5bf6640a87e4349dbaf1d..5465dc60df6dea7ded02d0c5c541d1aef346f437 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c @@ -7,315 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c index a9419e626947e05c59f0e79033a3a3836d012c98..c6bdb8d13d99811e6a4bad36654ac8f4b7b0258e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c @@ -7,159 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c index 7fa3492d12c408a5d62527806e999e1556a1a8a3..7afb166ffcae21a6e486a5e753d1d1d78d1ec401 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c @@ -6,419 +6,163 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c index 12e94a4a7b426ac3225e9dee8f5f1897c497b4ed..28a63b8f1ce4a9e150bd37db1eed5262657fa0fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c @@ -7,213 +7,153 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c index f404a2b01e9af8c79c321868908171773fe6aad2..eafc8ebb8fe5567a0eb6e807a0a4c434cfccb604 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c @@ -7,171 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32_tuple.c deleted file mode 100644 index de35620d09471e3ddc933719e8f5b5236bf99ef3..0000000000000000000000000000000000000000 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32_tuple.c +++ /dev/null @@ -1,128 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c index 18a174cc98fd76f1a1a11efc9bb58515c444a339..847dc916b00ccc37fbf8aa4919ec01e1d889c121 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c @@ -7,129 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_m(vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c index bf1ef87fcd7624cd0680153177aff004f22d7c55..1d6ad0ec9f7c0438701f16dabb8e3e70831c4c6a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c @@ -6,171 +6,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c index 9709051ba5d01cc9e55b33afe1e9e3aa657e3c3f..7e4d01a8ef68ddd0323cc74e0bed5a1cacf91170 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c @@ -7,195 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c index 89caea1be9a30b77679f2eb41f25d1b7760cfa35..c34b3539c856c694d33e6cbcbad118cdafc8d30d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c @@ -7,147 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c index 0ecc6ba8dcb3d90e540b38b2f005158aab6c0062..8e1c2c3c51f87f906b47685d3a10c8e3421db389 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c @@ -7,99 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c index 2a829be4d5295ca29418c7747cbb37ea66c6462b..a33b1336029db73f6b74969e63809f1fb0cf53fc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c @@ -6,163 +6,103 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c index 389dfd33b2080634550c74173be11466b5583f88..fdadc5542ee5f574694f5066925662b06089bb76 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c @@ -7,219 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c index 5b61ce9f07796ba82fbedc2a7cbef500cde27f15..8b3bf4c41f17c936deff2d6f9e805ee20c630744 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c @@ -7,165 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c index b4c7b0751c544bbbf7b9e712d913fb07efcb6c13..090d5024b3ccd5844f9230c4f76835d618cb5c90 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c @@ -7,111 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c index 71f5dd2b25f588fe38609c478d0b8cb2babb131f..01fd6866bb2166927bd9e11353901cd11ef8987c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c @@ -6,183 +6,103 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c index 04b14f73dd737d3269513afc8a709108c44cf6ca..38c8dd05e491fdde212df07b7c28ca632be5a4f1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c @@ -7,183 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c index 6c7047b8a8ba7e804cf4959da68ee78c941f95fb..ce92596934fd674ba9690d4674464ebc67d05a9a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c @@ -7,123 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c index f9ed4171ff9a06cfb80224e845bcaaa379488995..29f5eef1520ac9fd4e36319a40e60f6384115410 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c @@ -7,63 +7,33 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c index 6855f25dccef28ea4c8456470aaa0875fa25a04f..026c04e27e575ffb2c7488f0211fdcb0e8b014c0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c @@ -6,163 +6,83 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c index 838f66a4eb2323e486150e1658c8d835e4b3584e..eed25c65cd0af0e220f4567637fc8807d025155b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c @@ -7,201 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c index 0f39d063ebbfd4514b73081784b98f9adf34dc2f..2aa3254194ee7da74da4bdffc8784eaa3876b819 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c @@ -7,135 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c index df4d82b78d5fac532ed8078250b1f98e840f3d48..957e4f32bcdc0a4b0ed012f9f60a068f2a6bbfdc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c @@ -7,69 +7,33 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c index 3496c5351ddb4876c3374bc2053a4849cedb668b..a18db969c38bb38a25f28eb9417297795b5d0250 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c @@ -6,179 +6,83 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c index 943a831b3e48b4829174a5c1dbe6d610b2889b09..fa98d8768dc4b1fd22105496d2774431a123e45a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c @@ -7,219 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c index 2a7f874f57f14655e60d4f855b4023f973c2ad26..c432a18d58477043637ca15f43ca3141e5dea863 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c @@ -7,147 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c index 1c31f2b2954f29e7103fc9a82ea4d196a0c0bced..6d20dc5db82e9a008479a4e8fef67f59db7d2a2a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c @@ -7,75 +7,33 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c index a94f1a6bb4602d8acfc3e4f9e5042eaae9b242fa..03745f73308956d4028dd811efc527c875d2514a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c @@ -6,195 +6,83 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c index 4898927a1c3b1736df8e719cf8404564ba47b375..7d0545a489266b84aae72e7a84ba40ceb4ff1d4b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c @@ -7,237 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c index c1a793fdcb4b22df75fb64c551fea448f52cfe5f..d559b42ca33040e3d164202173e3f562579f4810 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c @@ -7,159 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c index 68f16c3d0c60d558fc3eb36140620277f4431851..14cde6db7d98cb66e8798175a5a87406c9586b48 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c @@ -7,81 +7,33 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c index e2a020046164f07c7cb3ca332fc8447bf83de6c8..2c835c652623437139818ee0c490808a86c52f80 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c @@ -6,211 +6,83 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c index 9e8e40e41615b49187c15afa759d0a378f596d41..0c2b8e1e78a999000b7d872f2b246039975a816e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c @@ -7,843 +7,843 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c index cce5918ebcc8926d2fe88d8410d4801d2a700a6c..34527c1889b1e42642c5003e1765bc6104034a4f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c @@ -7,675 +7,675 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32_tuple.c deleted file mode 100644 index cef14f42f1396090664c0122e8909df4c46e849f..0000000000000000000000000000000000000000 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32_tuple.c +++ /dev/null @@ -1,681 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c index e261ab667af9e48f1cffc9283b95fc00de09910e..83995551b497b5b6cac8af8c4f205303b396893d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c @@ -7,507 +7,507 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c index cfc48d3e308eb9f80587a5ae2575a2fbb304135d..9bf97d31a5a1672d2a2670e08c0603e989bf6568 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c @@ -1,680 +1,681 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c index b983dde860fce6c6aedc28ce26e05221fa91d656..909008b50ed4ce796d484c1ea8217baeb75b0eaa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c @@ -7,771 +7,771 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c index 8f493beef435afae22dfa2d3126260d3a615649f..84fd6c6019959e5d0134452a2ca844a494b3d4d2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c @@ -7,579 +7,579 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c index 3a7b84fe6a95ecb79f0dc97a454eed7dafac5002..a6d494f8094e5170d92bf599fe302a54b9d82ec0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c @@ -7,387 +7,387 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c index 769224db7d7088a56018993089baea4a1dfdf898..f8a6ae28dd83ee20591d7e95fa3461936435369e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c @@ -1,648 +1,649 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c index 9afccf0a810e2c3f7e6669bd32e8e573820fd067..502fb539dc2f00a070f0ca7e3f9b25927ba620bb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c @@ -7,867 +7,867 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c index bb70f55940791a1fe56de5b170e85716ac6d690d..50102f7493bc378a026deb1377b6979786f87199 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c @@ -7,651 +7,651 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c index 61ef42459c62c560fcf13aa664ad2ce7685c2c7b..674d1db3111a05ffe23b0f86dd3275c7c497c04b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c @@ -7,435 +7,435 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c index d91fc5ce70e01c02cd4f9a270caa05cd23a84c61..5e82cc6e56f9fc0c7d0ecf237620a5db9b59de9e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c @@ -1,728 +1,729 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c index 3987cfb8ea2d9afc27857f4bdd83c11e6ebfd049..fd3daf980e2ad733c42ef30da93ace34a086d342 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c @@ -7,723 +7,723 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c index 894a2f6d8b6dde39ae847bd8fa64e987157060cf..ef99b985833a9bcb585e7eeb837851243a2ff86d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c @@ -7,483 +7,483 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c index 3464ee62742abe92d48aa69150accfaa4379bf6f..e6a0fe371a39d30a364212942da07dbca29390ff 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c @@ -7,243 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c index a493de7065083a98db9a52254a36d2d6ce1ecd26..b4f06f8e9d96aead2e5369bc21e0c67ae4378078 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c @@ -1,648 +1,649 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c index b6ed001a53e97951d09d3e0dc2dbd0f86f4d6902..6b1702848f93659e5fe49ed2a3460954e90761bc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c @@ -7,795 +7,795 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c index 1c65f4b3791e76129a18b1175c5ab0202b5035fa..819078f7e96c0ae15de5c26bdd636745f0b303fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c @@ -7,531 +7,531 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c index a00fe18c6e94638cc4093ea7c76f748bd22c04e3..e2d1c4e424e8cf4393745877082b23511ab0d24f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c @@ -7,267 +7,267 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c index 9e50daaf97a3c1d186588f89c9ac1cc407633707..1ba496685f964e9ec80fdacdf5c25667f6dca098 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c @@ -1,712 +1,713 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c index f191ed1545614e1b6b6241e6612b0e7601b353f0..837910f8a44dbd3961c02cb2a1eef1788289f844 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c @@ -7,867 +7,867 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c index 583221914a33b18dada967603b03d563a1743576..fa8a21ffa63f533e773fe628c7c81b8a2400c474 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c @@ -7,579 +7,579 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c index bb9b87c2d3f8e656c5ebcce60b1258ace8bdbb5a..609b03156dea8968bf044c8e0519e21e2f6514b4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c @@ -7,291 +7,291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c index 750ab7baefbfdadf53c6539961aa98146ab2d1ac..16024dc5069d97f4b351009d8331406328896cc6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c @@ -1,776 +1,777 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c index 9329a3a58e222fa51d2dff0961fbc56c4d2b31e4..b44fd86d1d0e6cf0af88b453f4ba1e822ed01c64 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c @@ -7,939 +7,939 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c index 6bf3968d1b568e04ffaecfbae603399be44fa01b..f4d36e18ab8ba5a4005d384fbe34038cf4cee0df 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c @@ -7,627 +7,627 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c index 2a10530070b34c626a966539c3083044edc01956..eb0eee2825fb3460b4512dc72dc38f214ba8e7cf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c @@ -7,315 +7,315 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c index 1d90ed15e07e891104d5408d078ae6bf1dbc88b6..115b446ccb523a2cf48ceda0f5aa5a4edb473233 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c @@ -1,840 +1,841 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c index 8e5c40dedf0ee502b4be907e7a315ef3ffa3d347..bb4c2473b5ebf2fa4380310ce827fe31214919a4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c @@ -7,843 +7,843 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c index 4a7d3ce81bc530b8b445b3fbb27beeb5dad4b8c0..939a98caf3fea8fcbc4ca22fc27130c21ade1d88 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c @@ -7,675 +7,675 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32_tuple.c deleted file mode 100644 index e2e22416df76c92b6006b1852fabb29206364382..0000000000000000000000000000000000000000 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32_tuple.c +++ /dev/null @@ -1,680 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c index 34d00f525c3e1eeacf4b96d8cb47894f68748863..64e0eb4076ca1b8a33756e1c17e60277f6704415 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c @@ -7,507 +7,507 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c index c5b31c38ce96744d0e8b49be7ca4a2ef4eb82405..c0b6cb32e6e0eaae06f659829df0fea89b114396 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c @@ -1,680 +1,681 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c index 9a6e4218ef78d67f47de44b5d69467e4bc2cd0fb..87b08b269710c641d807258b71f0fbcaa5d54f11 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c @@ -7,771 +7,771 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c index f1b315f863ec916bacdd7f2d77f057b52881466a..2048eb56fbc810943a317fc142227f9296ddfba8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c @@ -7,579 +7,579 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c index 4f399d307f155a6610e340e31d6aa3e2f0cfa221..61ed04bcc53f3ee308707566c85b08f6122dee07 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c @@ -7,387 +7,387 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c index 7e74f5efb5bab69c3f878f7c579c2766a1103099..4166c5f3d94ee9e2bdd14539f3bfff9fc138af7a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c @@ -1,648 +1,649 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c index 749debacf6ba5620134d8b17b02e1518eefef46d..12a6dff404c2586daf11ae13db1609aa9b4ab6da 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c @@ -7,867 +7,867 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c index 137a9d00cbb1aaa3b9216aa9278451f2345d9656..8830f98dafc43f4cdbda316ebbe16d84316330fc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c @@ -7,651 +7,651 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c index fa4116dfd4e4fe0d04c42955ca162f5effc6246f..5cb0703f920ce804eb4d4c30a712696de6247bad 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c @@ -7,435 +7,435 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c index 4f8a663a14f9020b8583498b74945212601bdd90..8ff2e40cca0ef5efe0ccc93b7c0d569fb9bfa84a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c @@ -1,728 +1,729 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c index a23b2664913750f1a139f15b8a45f2f1b7b1262c..174241c2a0062f2e62e648b365575d146d200e38 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c @@ -7,723 +7,723 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c index 75c4022b3992ddb52737a6166d52e4bc05d8584b..6b85447394058a6a9984eb37c2eece3caf8183f0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c @@ -7,483 +7,483 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c index 5c97572801a080994cc04589176d72b48ddcc6d9..96a7c7e9607f4132b05d7d907bfe6484c92c9ad9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c @@ -7,243 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c index fa21ff9aa0b9f7758a1215af086bba2b9055887f..fd1f0d20e7baf547afd9af00b9010055bb27862f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c @@ -1,648 +1,649 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c index 0bb9934ae8d1ea094c5ceac5decbe93de589e03a..9118fe6e53bc348b8da069945292c70c9411539a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c @@ -7,795 +7,795 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c index 23deb0ae3d2ef6bc708d9a3f4726529b16f31dc0..39caaae3cbdafc7edcaff15e812a77fb232d4abd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c @@ -7,531 +7,531 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c index 156f505d07520784a9037f12ed406e73e916ee6a..d5590cf76234fe294c3bd30c0778fae6e43fb016 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c @@ -7,267 +7,267 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c index dbf7a410a7fb84b96e522bb8d89951beeb317dfa..35566447cfefa8711416a8d883d92e3f86c340e7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c @@ -1,712 +1,713 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c index 194aed96b182e8f954f0084769e2cb79ccc9984e..c2e9b0675208436e3ff0634110a3b991050be9d4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c @@ -7,867 +7,867 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c index 83f9b819ce4b2b03f5733ae8275cf6f66b9641ef..e6c5dd78899477abed9259146bade53bb2bce238 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c @@ -7,579 +7,579 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c index 160edc5c5160e259083efcaeec2cb886660aa3d5..175e639a35f0bf7ab9f761ca1129fc48b807fd81 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c @@ -7,291 +7,291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c index a14b5a59b90ff88e09d3496dc116fb180976b2f3..7125252d0d492b83e81ef9ae95f9516439c2093d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c @@ -1,776 +1,777 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c index 5c4485f2e0028bb393e9f39c15b9afb6d497512b..678d81a1ef7ac2e713672ebd14f097f7000d7631 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c @@ -7,939 +7,939 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c index 066e71ed19a491474e22fbd514cea0455a194cbf..1815a46083a2878e6eba0671f5819820aabb9eee 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c @@ -7,627 +7,627 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c index 33eb4a2a072a9f0f6482c252971a8b9cfe652995..05a1c3b9df59f3523fe7630369917d8a157b81bf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c @@ -7,315 +7,315 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c index 25d501ded86b5f9db75a3dec50ccd3d7832c2871..f6b284b164831473e0df8ba2337bae7a1c39374b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c @@ -1,840 +1,841 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); }